1 Setup

1.1 Imports

## Load packages
suppressMessages(library(tidyverse))
suppressMessages(library(lmerTest))
suppressMessages(library(MuMIn))
suppressMessages(library(broom.mixed))
suppressMessages(library(lavaan))
suppressMessages(library(reshape2))
suppressMessages(library(psych))
suppressMessages(library(glue))
suppressMessages(library(optimx))

# Read local files
source("utils.R")

1.2 Parameters

# Region reading time limits
rt.rel.sd = 2
rt.abs.min.pv = 300 # ms
rt.abs.max.pv = 3000 # ms
rt.abs.min.wm = 200 # ms
rt.abs.max.wm = 3000 # ms
rt.abs.max.irq = 120000 # ms
rt.abs.min.irq = 300 # ms

# Attention accuracy cutoff (retain if accuracy >= val)
cutoff.pv.accuracy = 0.66
cutoff.wm.accuracy = 0.66
cutoff.irq.attention = 0.66

# Exclude ppt with X% trials excluded
cutoff.removed.trials = 0.34

# lmer Control
optimxlmerControl <- lmerControl(optimizer = "optimx", optCtrl = list(method = "nlminb"))

optimxglmerControl <- glmerControl(optimizer = "optimx", optCtrl = list(method = "nlminb"))

1.3 Read in data

# Load data
ppts.raw <- read.csv("data/vipv_participant.csv")
rating <- read.csv("data/vipv_rating.csv")
binary <- read.csv("data/vipv_binary.csv")
irq.og <- read.csv("data/irq.csv")

1.4 Filter by study

# Preprocess participants
ppts.all <- preprocess.ppts(
  ppts.raw, "pilot")


pv.all <- binary %>%
  filter(participant_id %in% ppts.all$id,
         task == "PV") %>%
  mutate(accuracy = ifelse(is_correct == "True", 1, 0))

pv.critical.all <- pv.all %>% filter(item_type == "critical")

wm.all <- binary %>%
  filter(participant_id %in% ppts.all$id,
         task == "WM") %>%
  mutate(accuracy = ifelse(is_correct == "True", 1, 0))

wm.critical.all <- wm.all %>% filter(item_type == "critical")

irq.all <- rating %>%
  filter(participant_id %in% ppts.all$id,
         scale == "IRQ")

rm(ppts.raw)
rm(binary)
rm(rating)

2 Preprocess

2.1 Participant Exclusions

2.1.1 Demographics

# Exclude ppts
ppts.all <- exclude.ppts(ppts.all, native.eng=T, vision=T)

2.1.2 PV Accuracy

ppts.all <- exclude.ppts.accuracy(ppts.all, pv.all,  "pv", cutoff.pv.accuracy)

95% of ppts got > 66% of PV trials correct.

ppts.all %>%
  ggplot(aes(x = pv.accuracy, fill=excluded)) + 
  geom_histogram(bins=30) + 
  theme_minimal() + 
  scale_fill_manual("Excluded",
     labels = c("Retained", "Excluded"),
     values = c("#1eb809", "#cc0502")) + 
  labs(
    x = "PV Accuracy",
    y = "No. Participants"
  )

Participants who scored <66%:

ppts.all %>%
  filter(pv.accuracy < 0.66) %>%
  select(participant_id, pv.accuracy)
participant_id pv.accuracy
15 0.4739583

2.1.3 WM Accuracy

ppts.all <- exclude.ppts.accuracy(ppts.all, wm.all,  "wm", cutoff.wm.accuracy)

95% of ppts got > 66% of WM trials correct.

ppts.all %>%
  ggplot(aes(x = wm.accuracy, fill=excluded)) + 
  scale_fill_manual("Excluded",
     labels = c("Retained", "Excluded"),
     values = c("#1eb809", "#cc0502")) + 
  geom_histogram(bins=30) + 
  theme_minimal() + 
  labs(
    x = "WM Accuracy",
    y = "No. Participants"
  )

Participants who scored <66%:

ppts.all %>%
  filter(wm.accuracy < 0.66) %>%
  select(participant_id, wm.accuracy)
participant_id wm.accuracy
15 0.4814815

2.1.4 Summarize exclusions

ppts <- ppts.all %>% filter(excluded == 0)

summarise.exclusions(ppts.all)
Reason Removed (%)
ex.native_eng 0 0.0
ex.vision 0 0.0
ex.pv.accuracy 1 4.8
ex.wm.accuracy 0 0.0
—— NA NA
Total Removed 1 4.8
Retained 20 95.2

2.2 Trial Exclusions

2.2.1 Property Verification

Red dotted lines indicate exclusion criteria at 300ms and 3000ms.

pv.critical.all %>%
  merge(ppts.all %>% select(participant_id, excluded)) %>%
  ggplot(aes(x = reaction_time / 1000, fill=excluded)) + 
  geom_histogram(bins=30) + 
  scale_x_log10() + 
  theme_minimal() + 
  geom_vline(xintercept=rt.abs.min.pv / 1000, color="red", linetype="dashed") +
  geom_vline(xintercept=rt.abs.max.pv / 1000, color="red", linetype="dashed") +
  scale_fill_manual("Excluded",
     labels = c("Retained", "Excluded"),
     values = c("#1eb809", "#cc0502")) + 
  labs(
    x = "Property verification reaction times (s)",
    y = "No. Trials"
  )

pv.critical.all %>%
  ggplot(aes(x = reaction_time, y=accuracy)) + 
  stat_summary_bin(fun.data = mean_cl_boot, geom="point") + 
  stat_summary_bin(fun.data = mean_cl_boot, geom="errorbar") + 
  geom_smooth(method="lm", formula="y~x") +
  theme_minimal() + 
  geom_vline(xintercept=rt.abs.min.pv, color="red", linetype="dashed") +
  geom_vline(xintercept=rt.abs.max.pv, color="red", linetype="dashed") +
  scale_x_log10() + 
  labs()

PV trial exclusion summary:

pv.critical.all <- pv.critical.all %>% 
  
  filter(participant_id %in% ppts$id,  # Remove trials from excluded participants
         ) %>%
  # Exclude extreme rts
  exclude.relative.by.ppt(reaction_time, rt.rel.sd) %>%
  exclude.absolute.by.ppt(reaction_time, rt.abs.min.pv, rt.abs.max.pv)

  
res = exclude.ppt.ex.trials(
  ppts.all, pv.critical.all, cutoff.removed.trials, "ex.pv.ex.trials")

ppts.all <- res$ppts
pv.critical.all <- res$trials  # TODO: Hacky. Better method?

pv.critical <- pv.critical.all %>% filter(excluded==FALSE)

summarise.exclusions(pv.critical.all)
Reason Removed (%)
ex.reaction_time.rel.lo 0 0.0
ex.reaction_time.rel.hi 51 3.5
ex.reaction_time.abs.lo 0 0.0
ex.reaction_time.abs.hi 70 4.9
ex.ppt.excluded 0 0.0
—— NA NA
Total Removed 121 8.4
Retained 1319 91.6

Remove excluded participants:

ppts <- ppts.all %>% filter(excluded == 0)

2.2.2 Working Memory

Red dotted lines indicate exclusion criteria at 200ms and 3000ms.

wm.critical.all %>%
  merge(ppts.all %>% select(participant_id, excluded)) %>%
  ggplot(aes(x = reaction_time / 1000, fill=excluded)) + 
  geom_histogram(bins=30) + 
  scale_x_log10() + 
  theme_minimal() + 
  geom_vline(xintercept=rt.abs.min.wm / 1000, color="red", linetype="dashed") +
  geom_vline(xintercept=rt.abs.max.wm / 1000, color="red", linetype="dashed") +
  scale_fill_manual("Excluded",
     labels = c("Retained", "Excluded"),
     values = c("#1eb809", "#cc0502")) + 
  labs(
    x = "WM RT (s)",
    y = "No. Trials"
  )

wm.critical.all %>%
  ggplot(aes(x = reaction_time, y=accuracy)) + 
  stat_summary_bin(fun.data = mean_cl_boot, geom="point") + 
  stat_summary_bin(fun.data = mean_cl_boot, geom="errorbar") + 
  geom_smooth(method="lm", formula="y~x") +
  theme_minimal() + 
  geom_vline(xintercept=rt.abs.min.wm, color="red", linetype="dashed") +
  geom_vline(xintercept=rt.abs.max.wm, color="red", linetype="dashed") +
  scale_x_log10() + 
  labs()

WM trial exclusion summary:

wm.critical.all <- wm.critical.all %>% 
  
  filter(participant_id %in% ppts$id,  # Remove trials from excluded participants
  ) %>%
  
  # Exclude extreme rts
  exclude.relative.by.ppt(reaction_time, rt.rel.sd) %>%
  exclude.absolute.by.ppt(reaction_time, rt.abs.min.wm, rt.abs.max.wm)


res = exclude.ppt.ex.trials(
  ppts.all, wm.critical.all, cutoff.removed.trials, "ex.wm.ex.trials")

ppts.all <- res$ppts
corsi.all <- res$trials

wm.critical <- wm.critical.all %>% filter(excluded==FALSE)

summarise.exclusions(wm.critical.all)
Reason Removed (%)
ex.reaction_time.rel.lo 0 0.0
ex.reaction_time.rel.hi 50 5.2
ex.reaction_time.abs.lo 6 0.6
ex.reaction_time.abs.hi 11 1.1
—— NA NA
Total Removed 67 7.0
Retained 893 93.0

Remove excluded participants:

ppts <- ppts.all %>% filter(excluded == 0)

2.3 Item Analysis

pv.critical %>%
  group_by(item) %>%
  summarize(accuracy = mean(accuracy))  %>%
  ggplot(aes(x = accuracy)) +
  geom_histogram(aes(y=..count../sum(..count..) * 100), bins=10) + 
  labs(y = "Proportion of items")

pv.critical %>%
  group_by(item) %>%
  summarize(accuracy = mean(accuracy))  %>%
  filter(accuracy < 1) %>%
  ggplot(aes(x = reorder(item, -accuracy), y = accuracy)) +
  geom_point()

# TODO: merge w/ stimuli data

pv.critical %>%
  group_by(item) %>%
  summarize(
    reaction_time = mean(reaction_time),
    accuracy = mean(accuracy)
  ) %>%
  arrange(accuracy) %>%
  head(10)
item reaction_time accuracy
47 1626.000 0.1666667
65 1508.579 0.4736842
72 1873.353 0.5294118
69 1742.933 0.5333333
14 1506.222 0.5555556
17 1602.947 0.6315789
29 1749.947 0.6315789
70 1564.833 0.6666667
56 1847.250 0.6875000
36 1717.056 0.7222222

3 Interference Task

3.1 Preliminary

3.1.1 Merge Dual Task Data

wm.dt <- wm.critical %>%
  select(participant_id, trial_id, block_id, condition, version,
         reaction_time, accuracy) %>%
  rename(
    wm.modality = condition,
    wm.load = version,
    wm.reaction_time = reaction_time,
    wm.accuracy = accuracy
  ) %>%
  mutate(
    wm.load = factor(wm.load)
  )

vipv <- merge(pv.critical %>% mutate(block_id = as.integer(block_id)), wm.dt, by=c("trial_id", "block_id", "participant_id"), all = F) %>%
  rename (
    pv.modality = condition,
    pv.accuracy = accuracy,
    pv.reaction_time = reaction_time,
  ) %>%
  mutate(
    pv.modality = factor(pv.modality, levels=c("vi", "au")),
    wm.modality = factor(wm.modality, levels=c("vi", "au")),
    acc.dt = pv.accuracy + wm.accuracy,
    pv.rt.log = log(pv.reaction_time),
    pv.rt.log.c = scale(pv.rt.log),
    wm.rt.log = log(wm.reaction_time),
    wm.rt.log.c = scale(wm.rt.log),
  ) %>%
  arrange(trial_id)

3.1.2 Speed-Accuracy Tradeoff

The raw relationship between RT and accuracy is negative, implying (to me) that there is no speed-accuracy trade-off(?). However, this could be due to a correlation between “easy” items (on which participants are fast and accurate) or “good” participants (who are fast and accurate).

vipv %>%
  ggplot(aes(x = pv.reaction_time, y= pv.accuracy)) + 
  stat_summary_bin(geom="pointrange", bins=20, fun.data=mean_se) +
  scale_x_log10() +
  # geom_point() +
  geom_smooth(method="lm", formula="y~x")
## Warning: Removed 1 rows containing missing values (geom_segment).

We see the same trend within each condition combination

vipv %>%
  ggplot(aes(x = pv.reaction_time, y= pv.accuracy, color=wm.load)) + 
  facet_grid(cols=vars(pv.modality), rows=vars(wm.modality), labeller="label_both") +
  stat_summary_bin(geom="pointrange", bins=20, fun.data=mean_se) +
  scale_x_log10() +
  geom_smooth(method="lm", formula="y~x") +
  theme_minimal()
## Warning: Removed 4 rows containing missing values (geom_segment).
## Warning: Removed 5 rows containing missing values (geom_segment).
## Removed 5 rows containing missing values (geom_segment).
## Warning: Removed 2 rows containing missing values (geom_segment).

Participants who are faster are indeed more accurate

vipv %>%
  group_by(participant_id) %>%
  summarize(
    pv.reaction_time = mean(pv.reaction_time),
    pv.accuracy = mean(pv.accuracy)
  ) %>%
  ggplot(aes(x = pv.reaction_time, y= pv.accuracy)) + 
  # stat_summary_bin(geom="pointrange", bins=20) +
  scale_x_log10() +
  geom_point() +
  geom_smooth(method="lm", formula="y~x") + 
  labs(subtitle = "Grouped by participant")

And accuracy and rt of items was also negatively correlated.

vipv %>%
  group_by(item) %>%
  summarize(
    pv.reaction_time = mean(pv.reaction_time),
    pv.accuracy = mean(pv.accuracy)
  ) %>%
  ggplot(aes(x = pv.reaction_time, y= pv.accuracy)) + 
  # stat_summary_bin(geom="pointrange", bins=20) +
  scale_x_log10() +
  geom_point() +
  geom_smooth(method="lm", formula="y~x") + 
  labs(subtitle = "Grouped by item")

I tried to account for this by finding the by-ppt and by-item z-scores for accuracy and rt. Both continue to show a downward trend although potentially there’s a slight peak on the by-ppt plot.

vipv <- vipv %>%
  group_by(item) %>%
    mutate(
      pv.accuracy.item.z = (pv.accuracy - mean(pv.accuracy)) / sd(pv.accuracy),
      pv.rt.log.item.z = (pv.rt.log - mean(pv.rt.log)) / sd(pv.rt.log)
    ) %>%
  ungroup() %>%
  group_by(participant_id) %>%
    mutate(
      pv.accuracy.ppt.z = (pv.accuracy - mean(pv.accuracy)) / sd(pv.accuracy),
      pv.rt.log.ppt.z = (pv.rt.log - mean(pv.rt.log)) / sd(pv.rt.log)
    ) %>%
    ungroup() %>%
  mutate(
    pv.bis.ppt = pv.accuracy.ppt.z - pv.rt.log.ppt.z
  )
vipv %>%
    ggplot(aes(x = pv.rt.log.ppt.z, y= pv.accuracy.ppt.z)) + 
    stat_summary_bin(geom="pointrange", bins=20, fun.data=mean_se) +
    # geom_point() +
    geom_smooth(method="lm", formula="y~x")
## Warning: Removed 62 rows containing non-finite values (stat_summary_bin).
## Warning: Removed 62 rows containing non-finite values (stat_smooth).
## Warning: Removed 2 rows containing missing values (geom_segment).

vipv %>%
    ungroup() %>%
    ggplot(aes(x = pv.rt.log.item.z, y= pv.accuracy.item.z)) + 
    stat_summary_bin(geom="pointrange", bins=20) +
    # geom_point() +
    geom_smooth(method="lm", formula="y~x")
## Warning: Removed 476 rows containing non-finite values (stat_summary_bin).
## No summary function supplied, defaulting to `mean_se()`
## Warning: Removed 476 rows containing non-finite values (stat_smooth).
## Warning: Removed 2 rows containing missing values (geom_segment).

3.2 Property Verification Effects

3.2.1 Reaction Time

For PV RT we see the opposite cross-over interaction in the high-load condition, compared to the one observed by Vermeulen et al. Ppts are faster to respond to trials where the modality of the WM and PV trials match.

vipv %>%
  filter(wm.accuracy == 1,
         pv.accuracy == 1) %>%
  ggplot(aes(x=pv.modality, y = pv.rt.log, color=as.factor(wm.modality))) + 
  stat_summary(fun="mean", geom="point",size=5, position=position_dodge(width=1)) + 
  stat_summary(fun.data="mean_se", geom="errorbar", position=position_dodge(width=1)) + 
  facet_grid(cols=vars(wm.load), labeller = label_both) +
  theme(
  ) +
  labs(
    x = "PV Modality",
    y = "PV Reaction Time (log)",
    color = "WM Modality"
  )

The 3-way interaction has a non-sigificant, negative effect on RT’s.

m.vipv.pv.rt.base <- lmer(
  pv.rt.log ~  pv.modality * wm.modality + 
    pv.modality * wm.load + wm.modality * wm.load +
      (1 | participant_id) + (1 | item_id),
  data = vipv %>%
  filter(wm.accuracy == 1,
         pv.accuracy == 1), REML=F)

m.vipv.pv.rt <- lmer(
  pv.rt.log ~ pv.modality * wm.modality * wm.load +
              (1 | participant_id) + (1 | item_id),
  data = vipv %>%
  filter(wm.accuracy == 1,
         pv.accuracy == 1), REML=F)

summary(m.vipv.pv.rt)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: 
## pv.rt.log ~ pv.modality * wm.modality * wm.load + (1 | participant_id) +  
##     (1 | item_id)
##    Data: vipv %>% filter(wm.accuracy == 1, pv.accuracy == 1)
## 
##      AIC      BIC   logLik deviance df.resid 
##     96.4    146.5    -37.2     74.4      689 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.1724 -0.6633 -0.0587  0.5589  3.1892 
## 
## Random effects:
##  Groups         Name        Variance Std.Dev.
##  item_id        (Intercept) 0.01384  0.1176  
##  participant_id (Intercept) 0.03180  0.1783  
##  Residual                   0.05383  0.2320  
## Number of obs: 700, groups:  item_id, 48; participant_id, 20
## 
## Fixed effects:
##                                        Estimate Std. Error         df t value
## (Intercept)                            7.222696   0.053352  53.139006 135.379
## pv.modalityau                          0.021376   0.049588 120.453682   0.431
## wm.modalityau                          0.030580   0.036353 651.247657   0.841
## wm.load3                              -0.041210   0.036728 653.536613  -1.122
## pv.modalityau:wm.modalityau           -0.047143   0.051382 652.189901  -0.918
## pv.modalityau:wm.load3                 0.008185   0.051268 650.476869   0.160
## wm.modalityau:wm.load3                 0.031695   0.051444 658.292137   0.616
## pv.modalityau:wm.modalityau:wm.load3  -0.041058   0.073240 656.550607  -0.561
##                                      Pr(>|t|)    
## (Intercept)                            <2e-16 ***
## pv.modalityau                           0.667    
## wm.modalityau                           0.401    
## wm.load3                                0.262    
## pv.modalityau:wm.modalityau             0.359    
## pv.modalityau:wm.load3                  0.873    
## wm.modalityau:wm.load3                  0.538    
## pv.modalityau:wm.modalityau:wm.load3    0.575    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) pv.mdl wm.mdl wm.ld3 pv.m:. pv.:.3 wm.:.3
## pv.modality -0.475                                          
## wm.modality -0.355  0.383                                   
## wm.load3    -0.347  0.375  0.518                            
## pv.mdlty:w.  0.252 -0.516 -0.709 -0.370                     
## pv.mdlty:.3  0.250 -0.508 -0.373 -0.719  0.504              
## wm.mdlty:.3  0.253 -0.274 -0.710 -0.732  0.505  0.526       
## pv.mdl:.:.3 -0.179  0.362  0.501  0.518 -0.706 -0.720 -0.705
a.vipv.pv.rt <-  anova(m.vipv.pv.rt.base, m.vipv.pv.rt)
a.vipv.pv.rt
npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
m.vipv.pv.rt.base 10 94.70508 140.2159 -37.35254 74.70508 NA NA NA
m.vipv.pv.rt 11 96.39114 146.4530 -37.19557 74.39114 0.3139382 1 0.5752735

3.2.2 Accuracy

PV accuracy appears to show a 2-way interaction in the expected direction, with accuracy generally lower for same-modality trials in both the low and high-load conditions.

vipv %>%
  filter(wm.accuracy == 1) %>%
  ggplot(aes(x=pv.modality, y = pv.accuracy, color=as.factor(wm.modality))) + 
  stat_summary(fun="mean", geom="point",size=5, position=position_dodge(width=1)) + 
  stat_summary(fun.data="mean_se", geom="errorbar", position=position_dodge(width=1)) + 
  facet_grid(cols=vars(wm.load), labeller = label_value) +
  theme(
  ) +
  labs(
    x = "PV Modality",
    y = "PV Accuracy",
    color = "WM Modality"
  )

A linear model shows no 2-way interaction (or any other effects).

m.vipv.pv.acc.base <- glmer(
    pv.accuracy ~ 1 + pv.modality + wm.modality + wm.load + 
    pv.modality:wm.modality + wm.modality:wm.load + pv.modality:wm.load +
              (1 | participant_id) + (1 | item_id),
  data = vipv %>%
    filter(wm.accuracy == 1),
  family="binomial", control=optimxglmerControl)

m.vipv.pv.acc <- glmer(
    pv.accuracy ~ 1 + pv.modality + wm.modality + wm.load + 
    pv.modality:wm.modality + wm.modality:wm.load + pv.modality:wm.load +
    pv.modality:wm.modality:wm.load +
              (1 | participant_id) + (1 | item_id),
    data = vipv %>%
      filter(wm.accuracy == 1),
    family="binomial", control=optimxglmerControl)

summary(m.vipv.pv.acc)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: 
## pv.accuracy ~ 1 + pv.modality + wm.modality + wm.load + pv.modality:wm.modality +  
##     wm.modality:wm.load + pv.modality:wm.load + pv.modality:wm.modality:wm.load +  
##     (1 | participant_id) + (1 | item_id)
##    Data: vipv %>% filter(wm.accuracy == 1)
## Control: optimxglmerControl
## 
##      AIC      BIC   logLik deviance df.resid 
##    410.4    457.0   -195.2    390.4      776 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -6.8547  0.0450  0.0975  0.1880  3.2077 
## 
## Random effects:
##  Groups         Name        Variance Std.Dev.
##  item_id        (Intercept) 7.882    2.808   
##  participant_id (Intercept) 1.426    1.194   
## Number of obs: 786, groups:  item_id, 48; participant_id, 20
## 
## Fixed effects:
##                                      Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                           4.70721    0.98846   4.762 1.92e-06 ***
## pv.modalityau                        -0.76484    1.11833  -0.684    0.494    
## wm.modalityau                        -0.02312    0.66705  -0.035    0.972    
## wm.load3                             -0.27648    0.63688  -0.434    0.664    
## pv.modalityau:wm.modalityau           0.13207    0.89013   0.148    0.882    
## wm.modalityau:wm.load3                0.96221    0.98268   0.979    0.327    
## pv.modalityau:wm.load3                0.86039    0.88375   0.974    0.330    
## pv.modalityau:wm.modalityau:wm.load3 -1.09267    1.31435  -0.831    0.406    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) pv.mdl wm.mdl wm.ld3 pv.m:. wm.:.3 pv.:.3
## pv.modality -0.589                                          
## wm.modality -0.339  0.291                                   
## wm.load3    -0.352  0.299  0.488                            
## pv.mdlty:w.  0.266 -0.389 -0.757 -0.367                     
## wm.mdlty:.3  0.281 -0.213 -0.695 -0.682  0.517              
## pv.mdlty:.3  0.286 -0.380 -0.363 -0.729  0.500  0.505       
## pv.mdl:.:.3 -0.224  0.270  0.531  0.518 -0.675 -0.760 -0.704
a.vipv.pv.acc <- anova(m.vipv.pv.acc.base, m.vipv.pv.acc)
a.vipv.pv.acc
npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
m.vipv.pv.acc.base 9 409.0486 451.0512 -195.5243 391.0486 NA NA NA
m.vipv.pv.acc 10 410.3519 457.0215 -195.1760 390.3519 0.6967111 1 0.403891

3.2.3 Balanced Integration of Scores

In an attempt to combine RT and Accuracy, I used the Balanced Integration of Scores (BIS) (Liesefeld & Janczyk, 2018), essentially ppt-wise z(acc) - z(rt).

It seems to mostly be driven by RT, showing the same rough pattern of results (better performance on same-modality trials).

vipv %>%
  filter(wm.accuracy == 1) %>%
  ggplot(aes(x=pv.modality, y = pv.bis.ppt, color=as.factor(wm.modality))) + 
  stat_summary(fun="mean", geom="point",size=5, position=position_dodge(width=1)) + 
  stat_summary(fun.data="mean_se", geom="errorbar", position=position_dodge(width=1)) + 
  facet_grid(cols=vars(wm.load), labeller = label_both) +
  theme(
  ) +
  labs(
    x = "PV Modality",
    y = "PV BIS",
    color = "WM Modality"
  )
## Warning: Removed 57 rows containing non-finite values (stat_summary).
## Removed 57 rows containing non-finite values (stat_summary).

No significant effects.

m.vipv.pv.bis.base <- lmer(
  pv.bis.ppt ~ 1 + pv.modality + wm.modality + wm.load +
      pv.modality:wm.modality + wm.modality:wm.load + pv.modality:wm.load +
      (1 | participant_id) + (1 | item_id),
  data = vipv %>%
    filter(wm.accuracy == 1), REML=F)
## boundary (singular) fit: see help('isSingular')
m.vipv.pv.bis <- lmer(
  pv.bis.ppt ~ 1 + pv.modality + wm.modality + wm.load + 
    pv.modality:wm.modality + wm.modality:wm.load + pv.modality:wm.load + 
    pv.modality:wm.modality:wm.load +
              (1 | participant_id) + (1 | item_id),
  data = vipv %>%
    filter(wm.accuracy == 1), REML=F)
## boundary (singular) fit: see help('isSingular')
summary(m.vipv.pv.bis)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: 
## pv.bis.ppt ~ 1 + pv.modality + wm.modality + wm.load + pv.modality:wm.modality +  
##     wm.modality:wm.load + pv.modality:wm.load + pv.modality:wm.modality:wm.load +  
##     (1 | participant_id) + (1 | item_id)
##    Data: vipv %>% filter(wm.accuracy == 1)
## 
##      AIC      BIC   logLik deviance df.resid 
##   2476.1   2526.6  -1227.0   2454.1      718 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -4.1167 -0.5397  0.1048  0.6036  3.6015 
## 
## Random effects:
##  Groups         Name        Variance Std.Dev.
##  item_id        (Intercept) 0.8319   0.9121  
##  participant_id (Intercept) 0.0000   0.0000  
##  Residual                   1.4617   1.2090  
## Number of obs: 729, groups:  item_id, 48; participant_id, 18
## 
## Fixed effects:
##                                        Estimate Std. Error         df t value
## (Intercept)                            0.074414   0.228992  86.665046   0.325
## pv.modalityau                         -0.285895   0.319275  82.041512  -0.895
## wm.modalityau                         -0.067562   0.187214 688.335785  -0.361
## wm.load3                               0.171609   0.186248 688.128275   0.921
## pv.modalityau:wm.modalityau            0.007266   0.258522 688.884962   0.028
## wm.modalityau:wm.load3                -0.030716   0.263751 691.066143  -0.116
## pv.modalityau:wm.load3                 0.034346   0.258473 687.458494   0.133
## pv.modalityau:wm.modalityau:wm.load3   0.134498   0.368936 689.849370   0.365
##                                      Pr(>|t|)
## (Intercept)                             0.746
## pv.modalityau                           0.373
## wm.modalityau                           0.718
## wm.load3                                0.357
## pv.modalityau:wm.modalityau             0.978
## wm.modalityau:wm.load3                  0.907
## pv.modalityau:wm.load3                  0.894
## pv.modalityau:wm.modalityau:wm.load3    0.716
## 
## Correlation of Fixed Effects:
##             (Intr) pv.mdl wm.mdl wm.ld3 pv.m:. wm.:.3 pv.:.3
## pv.modality -0.717                                          
## wm.modality -0.421  0.302                                   
## wm.load3    -0.417  0.299  0.518                            
## pv.mdlty:w.  0.305 -0.403 -0.724 -0.375                     
## wm.mdlty:.3  0.300 -0.215 -0.713 -0.723  0.516              
## pv.mdlty:.3  0.300 -0.397 -0.373 -0.721  0.502  0.521       
## pv.mdl:.:.3 -0.215  0.282  0.509  0.517 -0.702 -0.715 -0.715
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
a.vipv.pv.bis <- anova(m.vipv.pv.bis.base, m.vipv.pv.bis)
a.vipv.pv.bis
npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
m.vipv.pv.bis.base 10 2474.202 2520.119 -1227.101 2454.202 NA NA NA
m.vipv.pv.bis 11 2476.070 2526.578 -1227.035 2454.070 0.1328024 1 0.7155439

3.3 Working Memory Effects

3.3.1 Reaction Time

WM RTs also show what could be a crossover in the high-load condition, again with faster RTs for same-modality trials.

vipv %>%
  filter(wm.accuracy == 1,
         pv.accuracy == 1) %>%
  ggplot(aes(x=pv.modality, y = wm.rt.log, color=as.factor(wm.modality))) + 
  stat_summary(fun="mean", geom="point",size=5, position=position_dodge(width=1)) + 
  stat_summary(fun.data="mean_se", geom="errorbar", position=position_dodge(width=1)) + 
  facet_grid(cols=vars(wm.load), labeller = label_value) +
  theme(
  ) +
  labs(
    x = "PV Modality",
    y = "WM Reaction Time (log s)",
    color = "WM Modality"
  )

The 3-way interaction has a negative effect which is not significant (p=0.23).

m.vipv.wm.rt.base <- lmer(
  wm.rt.log ~ 1 + pv.modality + wm.modality + wm.load +
      pv.modality:wm.modality + wm.modality:wm.load + pv.modality:wm.load +
      (1 | participant_id) + (1 | item_id),
  data = vipv %>%
  filter(wm.accuracy == 1,
         pv.accuracy == 1), REML=F)
## boundary (singular) fit: see help('isSingular')
m.vipv.wm.rt <- lmer(
  wm.rt.log ~ 1 + pv.modality + wm.modality + wm.load + 
    pv.modality:wm.modality + wm.modality:wm.load + pv.modality:wm.load + 
    pv.modality:wm.modality:wm.load +
              (1 | participant_id) + (1 | item_id),
  data = vipv %>%
  filter(wm.accuracy == 1,
         pv.accuracy == 1), REML=F)
## boundary (singular) fit: see help('isSingular')
summary(m.vipv.wm.rt)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: 
## wm.rt.log ~ 1 + pv.modality + wm.modality + wm.load + pv.modality:wm.modality +  
##     wm.modality:wm.load + pv.modality:wm.load + pv.modality:wm.modality:wm.load +  
##     (1 | participant_id) + (1 | item_id)
##    Data: vipv %>% filter(wm.accuracy == 1, pv.accuracy == 1)
## 
##      AIC      BIC   logLik deviance df.resid 
##    860.6    910.7   -419.3    838.6      689 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.3806 -0.6801 -0.0311  0.6325  3.2576 
## 
## Random effects:
##  Groups         Name        Variance Std.Dev.
##  item_id        (Intercept) 0.00000  0.0000  
##  participant_id (Intercept) 0.03897  0.1974  
##  Residual                   0.18256  0.4273  
## Number of obs: 700, groups:  item_id, 48; participant_id, 20
## 
## Fixed effects:
##                                        Estimate Std. Error         df t value
## (Intercept)                            6.536556   0.064548  66.203990 101.267
## pv.modalityau                          0.014769   0.064593 680.516201   0.229
## wm.modalityau                         -0.032284   0.064999 680.021736  -0.497
## wm.load3                              -0.060652   0.065646 680.663989  -0.924
## pv.modalityau:wm.modalityau           -0.003506   0.091743 680.680192  -0.038
## wm.modalityau:wm.load3                 0.048504   0.091003 679.973265   0.533
## pv.modalityau:wm.load3                 0.050945   0.091976 681.357950   0.554
## pv.modalityau:wm.modalityau:wm.load3  -0.158006   0.130061 681.253933  -1.215
##                                      Pr(>|t|)    
## (Intercept)                            <2e-16 ***
## pv.modalityau                           0.819    
## wm.modalityau                           0.620    
## wm.load3                                0.356    
## pv.modalityau:wm.modalityau             0.970    
## wm.modalityau:wm.load3                  0.594    
## pv.modalityau:wm.load3                  0.580    
## pv.modalityau:wm.modalityau:wm.load3    0.225    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) pv.mdl wm.mdl wm.ld3 pv.m:. wm.:.3 pv.:.3
## pv.modality -0.531                                          
## wm.modality -0.528  0.527                                   
## wm.load3    -0.523  0.525  0.519                            
## pv.mdlty:w.  0.375 -0.706 -0.709 -0.371                     
## wm.mdlty:.3  0.377 -0.378 -0.714 -0.721  0.508              
## pv.mdlty:.3  0.375 -0.705 -0.371 -0.717  0.499  0.516       
## pv.mdl:.:.3 -0.266  0.500  0.501  0.508 -0.707 -0.702 -0.709
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
a.vipv.wm.rt <- anova(m.vipv.wm.rt.base, m.vipv.wm.rt)
a.vipv.wm.rt
npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
m.vipv.wm.rt.base 10 860.0706 905.5814 -420.0353 840.0706 NA NA NA
m.vipv.wm.rt 11 860.5966 910.6585 -419.2983 838.5966 1.474016 1 0.2247131

3.3.2 Working Memory Accuracy

WM Accuracy seems to show a main effect of load (which makes sense).

vipv %>%
  filter(pv.accuracy == 1) %>%
  ggplot(aes(x=pv.modality, y = wm.accuracy, color=as.factor(wm.modality))) + 
  stat_summary(fun="mean", geom="point",size=5, position=position_dodge(width=1)) + 
  stat_summary(fun.data="mean_se", geom="errorbar", position=position_dodge(width=1)) + 
  facet_grid(cols=vars(wm.load), labeller = label_value) +
  theme(
  ) +
  labs(
    x = "PV Modality",
    y = "WM Accuracy",
    color = "WM Modality"
  )

The load effect, however, is nonsignificant (p=0.36)

m.vipv.wm.acc.base <- glmer(
    wm.accuracy ~ 1 + pv.modality + wm.modality + wm.load + 
    pv.modality:wm.modality + wm.modality:wm.load + pv.modality:wm.load +
              (1 | participant_id) + (1 | item_id),
  data = vipv %>%
    filter(pv.accuracy == 1),
  family="binomial", control=optimxglmerControl)

m.vipv.wm.acc <- glmer(
    wm.accuracy ~ 1 + pv.modality + wm.modality + wm.load + 
    pv.modality:wm.modality + wm.modality:wm.load + pv.modality:wm.load +
    pv.modality:wm.modality:wm.load +
              (1 | participant_id) + (1 | item_id),
    data = vipv %>%
    filter(pv.accuracy == 1),
  family="binomial", control=optimxglmerControl)

summary(m.vipv.wm.acc)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: 
## wm.accuracy ~ 1 + pv.modality + wm.modality + wm.load + pv.modality:wm.modality +  
##     wm.modality:wm.load + pv.modality:wm.load + pv.modality:wm.modality:wm.load +  
##     (1 | participant_id) + (1 | item_id)
##    Data: vipv %>% filter(pv.accuracy == 1)
## Control: optimxglmerControl
## 
##      AIC      BIC   logLik deviance df.resid 
##    291.7    337.7   -135.9    271.7      727 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -8.2790  0.1175  0.1613  0.2319  0.5676 
## 
## Random effects:
##  Groups         Name        Variance Std.Dev.
##  item_id        (Intercept) 0.5681   0.7537  
##  participant_id (Intercept) 0.5692   0.7544  
## Number of obs: 737, groups:  item_id, 48; participant_id, 20
## 
## Fixed effects:
##                                      Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                           3.75798    0.68542   5.483 4.19e-08 ***
## pv.modalityau                         0.35789    0.90644   0.395    0.693    
## wm.modalityau                         0.59758    0.95432   0.626    0.531    
## wm.load3                             -0.68847    0.75335  -0.914    0.361    
## pv.modalityau:wm.modalityau           0.10432    1.53791   0.068    0.946    
## wm.modalityau:wm.load3                0.03283    1.18269   0.028    0.978    
## pv.modalityau:wm.load3               -0.97142    1.05270  -0.923    0.356    
## pv.modalityau:wm.modalityau:wm.load3 -0.36073    1.78052  -0.203    0.839    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) pv.mdl wm.mdl wm.ld3 pv.m:. wm.:.3 pv.:.3
## pv.modality -0.589                                          
## wm.modality -0.543  0.435                                   
## wm.load3    -0.721  0.533  0.513                            
## pv.mdlty:w.  0.313 -0.558 -0.627 -0.317                     
## wm.mdlty:.3  0.460 -0.343 -0.803 -0.646  0.498              
## pv.mdlty:.3  0.460 -0.811 -0.381 -0.711  0.487  0.461       
## pv.mdl:.:.3 -0.271  0.488  0.543  0.427 -0.866 -0.665 -0.604
a.vipv.wm.acc <- anova(m.vipv.wm.acc.base, m.vipv.wm.acc)
a.vipv.wm.acc
npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
m.vipv.wm.acc.base 9 289.7638 331.1871 -135.8819 271.7638 NA NA NA
m.vipv.wm.acc 10 291.7225 337.7483 -135.8612 271.7225 0.0413126 1 0.8389357

3.4 Adjust for multiple comparisons

p.val <- c(a.vipv.pv.rt$`Pr(>Chisq)`[2],
            a.vipv.pv.acc$`Pr(>Chisq)`[2],
            a.vipv.wm.rt$`Pr(>Chisq)`[2],
            a.vipv.wm.acc$`Pr(>Chisq)`[2])

test <- c("PV RT", "PV Acc", "WM RT", "WM Acc")
p.val.adj <- stats::p.adjust(p.val, method="BH")

data.frame(test, p.val, p.val.adj)
test p.val p.val.adj
PV RT 0.5752735 0.7670314
PV Acc 0.4038910 0.7670314
WM RT 0.2247131 0.7670314
WM Acc 0.8389357 0.8389357

4 IRQ

4.1 Preprocess

irq.attention.answers <- data.frame(
  item_id = c(37, 38, 39),
  correct_response = c(1,5,1))

irq.all <- irq.all %>%
  mutate(item_type = recode(item_type, catch = "attention"))

ppts.all <- exclude.ppts.attention(ppts.all, irq.all, irq.attention.answers, "irq", cutoff.irq.attention)

100% of ppts got > 66% IRQ attention checks correct.

ppts.all %>%
  ggplot(aes(x = irq.attention.accuracy)) + 
  geom_histogram() + 
  theme_minimal() + 
  labs(
    x = "IRQ Attention Accuracy",
    y = "No. Participants"
  )
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.

Participants who failed IRQ attention check.

ppts.all %>%
  filter(irq.attention.accuracy < 0.66) %>%
  select(participant_id, irq.attention.accuracy)
participant_id irq.attention.accuracy

IRQ Accuracy by question

irq.all %>%
  filter(item_type == "attention") %>%
  merge(irq.attention.answers) %>%
  mutate(accuracy = ifelse(response == correct_response, 1, 0)) %>%
  group_by(item_id) %>%
  summarize(accuracy = mean(accuracy), .groups="drop")
item_id accuracy
37 1
38 1
39 1
irq.all <- irq.all %>% 
  
  filter(participant_id %in% ppts$id,  # Remove trials from excluded participants
         item_type != "attention"  # Remove catch trials 
  ) %>%
  
  # Exclude extreme rts
  # exclude.relative.by.ppt(reaction_time, rt.rel.sd) %>%
  exclude.absolute.by.ppt(reaction_time, rt.abs.min.irq, rt.abs.max.irq)


res = exclude.ppt.ex.trials(
  ppts.all, irq.all, cutoff.removed.trials, "ex.irq.ex.trials")

ppts.all <- res$ppts
irq.all <- res$trials

irq <- irq.all %>% filter(excluded==FALSE)

summarise.exclusions(irq.all)
Reason Removed (%)
ex.reaction_time.abs.lo 0 0
ex.reaction_time.abs.hi 0 0
ex.ppt.excluded 0 0
—— NA NA
Total Removed 0 0
Retained 720 100

4.2 EFA

Pivot the IRQ question data into (item x ppt response)

irq <- irq.all %>%
  filter(participant_id %in% ppts$id)

irq.qs <- irq %>%
  select(item_id, response, participant_id) %>%
  pivot_wider(names_from=item_id, values_from=response) %>%
  arrange(participant_id) %>%
  select(-participant_id) %>%
  select(order(as.numeric(colnames(.))))
  # data.frame()
  
  

# Reverse loadings for reversed q's
irq.qs[10] = 6 - irq.qs[10]
irq.qs[19] = 6 - irq.qs[19]
irq.qs[33] = 6 - irq.qs[33]

# irq.qs

Parallel analysis recommends 2 factors.

fa.parallel(irq.qs)
## In smc, smcs < 0 were set to .0
## In smc, smcs < 0 were set to .0
## In smc, smcs < 0 were set to .0
## Warning in cor.smooth(r): Matrix was not positive definite, smoothing was done
## Warning in fa.stats(r = r, f = f, phi = phi, n.obs = n.obs, np.obs = np.obs, :
## The estimated weights for the factor scores are probably incorrect. Try a
## different factor score estimation method.
## In factor.scores, the correlation matrix is singular, the pseudo inverse is  used
## I was unable to calculate the factor score weights, factor loadings used instead

## Parallel analysis suggests that the number of factors =  2  and the number of components =  2

Re-performing FA with 4 factors seems to produce similar-looking loadings to the original paper (in the same order, i.e. MR1=verbal, MR2=orthographics, MR3=visual, MR4=manipulation)

irq.fa.4 <- psych::fa(irq.qs, nfactors=4,
                      rotate="oblimin",  # Oblique rotation as in original paper
                      fm="minres"  # Default minimum residual factoring method
                      )
## In smc, smcs < 0 were set to .0
## In smc, smcs < 0 were set to .0
## In smc, smcs < 0 were set to .0
## Loading required namespace: GPArotation
## Warning in cor.smooth(r): Matrix was not positive definite, smoothing was done
## Warning in fa.stats(r = r, f = f, phi = phi, n.obs = n.obs, np.obs = np.obs, :
## The estimated weights for the factor scores are probably incorrect. Try a
## different factor score estimation method.
## In factor.scores, the correlation matrix is singular, the pseudo inverse is  used
## I was unable to calculate the factor score weights, factor loadings used instead
loadings <- irq.fa.4$loadings

loadings <- as.data.frame(loadings[1:36,1:4])
loadings$item_id <- row.names(loadings)

loadings$item.factor <- c(rep("Visual", 10), rep("Verbal", 12), rep("Orthographic", 6), rep("Manipulation", 8))

loadings.pivot <- loadings %>%
  pivot_longer(cols=c(MR1, MR2, MR3, MR4),
               names_to="Factor", values_to="Loading New")

loadings.pivot %>%
  ggplot(aes(x = Factor, y = `Loading New`, color=item.factor)) + 
  stat_summary(fun.data=mean_se, geom="pointrange")

  # geom_point(stat="identity", position="dodge")

# loadings

Compare loadings for each item between original and new data analysis. The match looks fairly good visually, the same set of items in the high end of each axis.

colnames(loadings) <- c("Verbal", "Orthographic", "Visual", "Manipulation", "item_id")

irq.og$item.factor <- c(rep("Visual", 10), rep("Verbal", 12), rep("Orthographic", 6), rep("Manipulation", 8), rep("Filler", 3))

irq.og.loadings <- irq.og %>%
  filter(item_type == "critical") %>%  # remove fillers
  select(item_id, item.factor, Visual, Verbal, Orthographic, Manipulation)

loadings.pivot <- loadings %>%
  select(Visual, Verbal, Orthographic, Manipulation, item_id) %>%
  pivot_longer(cols=c(Visual, Verbal, Orthographic, Manipulation),
               names_to="Factor", values_to="Loading New")

loadings.og.pivot <- irq.og.loadings %>%
  pivot_longer(cols=c(Visual, Verbal, Orthographic, Manipulation),
               names_to="Factor", values_to="Loading OG")

loadings.all <- merge(loadings.pivot, loadings.og.pivot)

loadings.all %>%
  ggplot(aes(x = `Loading OG`, y = `Loading New`, color=item.factor)) + 
  geom_point() + 
  geom_text(aes(label=item_id), nudge_x = 0.03, nudge_y = 0.03) +
  facet_wrap(facets=vars(Factor)) + 
  theme_minimal()

Merge IRQ data onto ppts:

I’m using EFA fit as it seems to be what was used in the original paper.

# Merge ppt scores
ppt.scores <- as.data.frame(irq.fa.4$scores)
colnames(ppt.scores) <- c("Verbal", "Orthographic", "Visual", "Manipulation")

ppts <- cbind(ppts,ppt.scores)

4.3 Get ppt variance measures

4.3.1 Participant Random Slopes

m.vipv.pv.acc.ixn <- glmer(
    pv.accuracy ~ 1 + pv.modality + wm.modality + wm.load + 
      pv.modality:wm.modality + wm.modality:wm.load + pv.modality:wm.load +
      pv.modality:wm.modality:wm.load +
      (
        pv.modality + wm.modality + wm.load + 
      pv.modality:wm.modality + wm.modality:wm.load + pv.modality:wm.load +
        pv.modality:wm.modality:wm.load | participant_id) + (1 | item_id),
    data = vipv %>% filter(wm.accuracy == 1),
    family="binomial", control=optimxglmerControl)
## Warning in optwrap(optimizer, devfun, start, rho$lower, control = control, :
## convergence code 1 from optimx: none
## Warning in optimx.check(par, optcfg$ufn, optcfg$ugr, optcfg$uhess, lower, : Parameters or bounds appear to have different scalings.
##   This can cause poor performance in optimization. 
##   It is important for derivative free methods like BOBYQA, UOBYQA, NEWUOA.
## Warning in optwrap(optimizer, devfun, start, rho$lower, control = control, :
## convergence code 1 from optimx: none
## boundary (singular) fit: see help('isSingular')
summary(m.vipv.pv.acc.ixn)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: 
## pv.accuracy ~ 1 + pv.modality + wm.modality + wm.load + pv.modality:wm.modality +  
##     wm.modality:wm.load + pv.modality:wm.load + pv.modality:wm.modality:wm.load +  
##     (pv.modality + wm.modality + wm.load + pv.modality:wm.modality +  
##         wm.modality:wm.load + pv.modality:wm.load + pv.modality:wm.modality:wm.load |  
##         participant_id) + (1 | item_id)
##    Data: vipv %>% filter(wm.accuracy == 1)
## Control: optimxglmerControl
## 
##      AIC      BIC   logLik deviance df.resid 
##    466.1    676.1   -188.0    376.1      741 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -4.5294  0.0259  0.0676  0.1542  2.1911 
## 
## Random effects:
##  Groups         Name                                 Variance Std.Dev. Corr 
##  item_id        (Intercept)                          10.940   3.308         
##  participant_id (Intercept)                           6.002   2.450         
##                 pv.modalityau                         4.211   2.052    -0.87
##                 wm.modalityau                         7.817   2.796    -0.77
##                 wm.load3                              8.404   2.899    -0.92
##                 pv.modalityau:wm.modalityau           4.032   2.008     0.66
##                 wm.modalityau:wm.load3               16.235   4.029     0.85
##                 pv.modalityau:wm.load3               14.810   3.848     0.96
##                 pv.modalityau:wm.modalityau:wm.load3 23.197   4.816    -0.57
##                                     
##                                     
##                                     
##                                     
##   0.95                              
##   0.87  0.82                        
##  -0.88 -0.80 -0.54                  
##  -0.98 -0.93 -0.78  0.94            
##  -0.94 -0.81 -0.93  0.76  0.90      
##   0.72  0.56  0.36 -0.94 -0.82 -0.67
## Number of obs: 786, groups:  item_id, 48; participant_id, 20
## 
## Fixed effects:
##                                      Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                            6.1635     1.6807   3.667 0.000245 ***
## pv.modalityau                         -1.7539     1.7290  -1.014 0.310391    
## wm.modalityau                         -0.4129     1.4927  -0.277 0.782046    
## wm.load3                              -1.2711     1.3769  -0.923 0.355930    
## pv.modalityau:wm.modalityau            0.9880     1.6144   0.612 0.540524    
## wm.modalityau:wm.load3                 2.6801     2.3338   1.148 0.250812    
## pv.modalityau:wm.load3                 2.6345     1.8241   1.444 0.148663    
## pv.modalityau:wm.modalityau:wm.load3  -3.5109     2.7422  -1.280 0.200427    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) pv.mdl wm.mdl wm.ld3 pv.m:. wm.:.3 pv.:.3
## pv.modality -0.719                                          
## wm.modality -0.569  0.531                                   
## wm.load3    -0.674  0.603  0.676                            
## pv.mdlty:w.  0.511 -0.556 -0.860 -0.563                     
## wm.mdlty:.3  0.432 -0.393 -0.675 -0.585  0.598              
## pv.mdlty:.3  0.591 -0.581 -0.565 -0.829  0.574  0.530       
## pv.mdl:.:.3 -0.363  0.373  0.546  0.461 -0.639 -0.867 -0.626
## optimizer (optimx) convergence code: 1 (none)
## boundary (singular) fit: see help('isSingular')
## Parameters or bounds appear to have different scalings.
##   This can cause poor performance in optimization. 
##   It is important for derivative free methods like BOBYQA, UOBYQA, NEWUOA.

We take the random slope for the domainVisual effect for each participant in a linear model.

ppt.ranef = ranef(m.vipv.pv.acc.ixn)$participant_id

ppt.ranef <- ppt.ranef %>%
  mutate(participant_id = rownames(ppt.ranef))

ppt.ranef$ixn.full = ppt.ranef$`pv.modalityau:wm.modalityau:wm.load3` + fixef(m.vipv.pv.acc.ixn)["pv.modalityau:wm.modalityau:wm.load3"]

There is a some variance in the effect by participant, with participants varying from -6.1653481 to 4.1852347.

ppt.ranef %>%
  ggplot(aes(x = reorder(participant_id, -ixn.full), y = ixn.full)) +
  geom_hline(yintercept=fixef(m.vipv.pv.acc.ixn)["pv.modalityau:wm.modalityau:wm.load3"], color="red", linetype="dashed") +
  geom_point() + 
  theme_minimal() + 
  theme(
    axis.text.x = element_blank()
  ) + 
  labs(
    subtitle = "Participant Random Slopes for 3-way Interaction Effect",
    title = "PV Accuracy Random Slopes",
    y = "3-way Interaction Effect (Fixed Effect + Random Slope)",
    x = "Participant Id",
    caption = "Red dashed line represents fixed effect"
  )

ppts <- merge(ppts, ppt.ranef, by="participant_id", all.y=F)

4.4 Correlation of IRQ and Random Slopes

4.4.1 3-way interaction

Correlations of IRQ w/ ppt random-slopes don’t show much of interest.

ppts %>%
  pivot_longer(cols=c(Visual, Verbal, Orthographic, Manipulation), names_to="factor", values_to="score") %>%
  ggplot(aes(x = score, y = ixn.full)) +
  geom_point() +
  geom_smooth(method="lm", formula="y~x") + 
  facet_wrap(facets=vars(factor)) + 
  theme_minimal() +
  labs(
    # title="IRQ vs PR Domain ∆",
    # subtitle = "By-Participant",
    # x = "IRQ Dimension Score",
    # y = "PR Accuracy Domain ∆ (Visual - Social)"
    )

Simple linear models show no effect of either variable.

summary(lm(ixn.full ~ Visual, data=ppts))
summary(lm(ixn.full ~ Manipulation, data=ppts))
## 
## Call:
## lm(formula = ixn.full ~ Visual, data = ppts)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -3.3426 -1.8269 -0.6854  0.6660  6.8438 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept) -2.91471    0.66079  -4.411 0.000337 ***
## Visual       0.06123    0.17879   0.342 0.735958    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 2.955 on 18 degrees of freedom
## Multiple R-squared:  0.006474,   Adjusted R-squared:  -0.04872 
## F-statistic: 0.1173 on 1 and 18 DF,  p-value: 0.736
## 
## 
## Call:
## lm(formula = ixn.full ~ Manipulation, data = ppts)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -3.3340 -1.9139 -0.6915  0.5977  7.1268 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  -2.91471    0.66278  -4.398 0.000347 ***
## Manipulation -0.01703    0.18418  -0.092 0.927362    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 2.964 on 18 degrees of freedom
## Multiple R-squared:  0.0004746,  Adjusted R-squared:  -0.05505 
## F-statistic: 0.008547 on 1 and 18 DF,  p-value: 0.9274

4.4.2 WM modality

We hypothesized that verbalizers would show a bigger main effect of WM modality, because auditory info would interfere with their default representation strategy. However, that relationship doesn’t look clear in these data.

Could be a negative relationship between Orthographic and modality.au effect (on pv accuracy).

ppts %>%
  pivot_longer(cols=c(Visual, Verbal, Orthographic, Manipulation), names_to="factor", values_to="score") %>%
  ggplot(aes(x = score, y = wm.modalityau)) +
  geom_point() +
  geom_smooth(method="lm", formula="y~x") + 
  facet_wrap(facets=vars(factor)) + 
  theme_minimal() +
  labs(
    # title="IRQ vs PR Domain ∆",
    # subtitle = "By-Participant",
    # x = "IRQ Dimension Score",
    # y = "PR Accuracy Domain ∆ (Visual - Social)"
    )

Simple linear models show no effect of either variable.

summary(lm(wm.modalityau ~ Verbal, data=ppts))
summary(lm(wm.modalityau ~ Orthographic, data=ppts))
## 
## Call:
## lm(formula = wm.modalityau ~ Verbal, data = ppts)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -3.6123 -0.9973  0.0761  1.1443  2.7429 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)
## (Intercept)  0.05471    0.42143   0.130    0.898
## Verbal      -0.03655    0.07276  -0.502    0.622
## 
## Residual standard error: 1.885 on 18 degrees of freedom
## Multiple R-squared:  0.01382,    Adjusted R-squared:  -0.04097 
## F-statistic: 0.2523 on 1 and 18 DF,  p-value: 0.6216
## 
## 
## Call:
## lm(formula = wm.modalityau ~ Orthographic, data = ppts)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -3.3681 -0.6110 -0.0879  0.7227  2.8988 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)
## (Intercept)   0.05471    0.39709   0.138    0.892
## Orthographic -0.13000    0.08128  -1.599    0.127
## 
## Residual standard error: 1.776 on 18 degrees of freedom
## Multiple R-squared:  0.1244, Adjusted R-squared:  0.0758 
## F-statistic: 2.558 on 1 and 18 DF,  p-value: 0.1271

4.4.3 PV modality

Exploratory.

Again, high orthographic loading correlates with a negative effect of pv-au (less accurate on auditory stimuli). Not sure why this would be the case

ppts %>%
  pivot_longer(cols=c(Visual, Verbal, Orthographic, Manipulation), names_to="factor", values_to="score") %>%
  ggplot(aes(x = score, y = pv.modalityau)) +
  geom_point() +
  geom_smooth(method="lm", formula="y~x") + 
  facet_wrap(facets=vars(factor)) + 
  theme_minimal() +
  labs(
    # title="IRQ vs PR Domain ∆",
    # subtitle = "By-Participant",
    # x = "IRQ Dimension Score",
    # y = "PR Accuracy Domain ∆ (Visual - Social)"
    )

Simple linear models show no effect of either variable.

summary(lm(pv.modalityau ~ Orthographic, data=ppts))
## 
## Call:
## lm(formula = pv.modalityau ~ Orthographic, data = ppts)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -2.1403 -0.5573 -0.2812  0.4885  2.4288 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)
## (Intercept)   0.14706    0.28736   0.512    0.615
## Orthographic -0.09539    0.05882  -1.622    0.122
## 
## Residual standard error: 1.285 on 18 degrees of freedom
## Multiple R-squared:  0.1275, Adjusted R-squared:  0.07902 
## F-statistic:  2.63 on 1 and 18 DF,  p-value: 0.1222

4.4.4 PV modality x WM modality

Exploratory.

Orthographic shows the steepest relationship here too, but all seem to be pushed about by these two extreme points.

ppts %>%
  pivot_longer(cols=c(Visual, Verbal, Orthographic, Manipulation), names_to="factor", values_to="score") %>%
  ggplot(aes(x = score, y = `pv.modalityau:wm.modalityau`)) +
  geom_point() +
  geom_smooth(method="lm", formula="y~x") + 
  facet_wrap(facets=vars(factor)) + 
  theme_minimal() +
  labs(
    # title="IRQ vs PR Domain ∆",
    # subtitle = "By-Participant",
    # x = "IRQ Dimension Score",
    # y = "PR Accuracy Domain ∆ (Visual - Social)"
    )

Simple linear models show no effect of either variable.

summary(lm(`pv.modalityau:wm.modalityau` ~ Manipulation, data=ppts))
summary(lm(`pv.modalityau:wm.modalityau` ~ Orthographic, data=ppts))
## 
## Call:
## lm(formula = `pv.modalityau:wm.modalityau` ~ Manipulation, data = ppts)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -2.8879 -0.3898  0.2314  0.7771  1.5262 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)
## (Intercept)  -0.15753    0.27271  -0.578    0.571
## Manipulation  0.03241    0.07578   0.428    0.674
## 
## Residual standard error: 1.22 on 18 degrees of freedom
## Multiple R-squared:  0.01006,    Adjusted R-squared:  -0.04494 
## F-statistic: 0.1829 on 1 and 18 DF,  p-value: 0.674
## 
## 
## Call:
## lm(formula = `pv.modalityau:wm.modalityau` ~ Orthographic, data = ppts)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -2.7611 -0.2923  0.2208  0.7025  1.7898 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)
## (Intercept)  -0.15753    0.26437  -0.596    0.559
## Orthographic  0.06281    0.05411   1.161    0.261
## 
## Residual standard error: 1.182 on 18 degrees of freedom
## Multiple R-squared:  0.06965,    Adjusted R-squared:  0.01796 
## F-statistic: 1.348 on 1 and 18 DF,  p-value: 0.2609